rc = -ENOENT;
break;
}
+ /* XXX: Ugly: pull all the checks into a separate function.
+ * Don't want to do it now, not to interfere with mem_paging
+ * patches */
+ else if ( p2m_ram_shared == l1e_p2mt )
+ {
+ /* Unshare the page for RW foreign mappings */
+ if(l1e_get_flags(l1e) & _PAGE_RW)
+ {
+ rc = mem_sharing_unshare_page(pg_owner,
+ l1e_get_pfn(l1e),
+ 0);
+ if(rc) break;
+ }
+ }
okay = mod_l1_entry(va, l1e, mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, v,
rc = -ENOENT;
break;
}
+ else if ( p2m_ram_shared == l2e_p2mt )
+ {
+ MEM_LOG("Unexpected attempt to map shared page.\n");
+ rc = -EINVAL;
+ break;
+ }
+
okay = mod_l2_entry(va, l2e, mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
rc = -ENOENT;
break;
}
+ else if ( p2m_ram_shared == l3e_p2mt )
+ {
+ MEM_LOG("Unexpected attempt to map shared page.\n");
+ rc = -EINVAL;
+ break;
+ }
rc = mod_l3_entry(va, l3e, mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, 1, v);
rc = -ENOENT;
break;
}
+ else if ( p2m_ram_shared == l4e_p2mt )
+ {
+ MEM_LOG("Unexpected attempt to map shared page.\n");
+ rc = -EINVAL;
+ break;
+ }
rc = mod_l4_entry(va, l4e, mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, 1, v);
return (max_nr_grant_frames * MAX_MAPTRACK_TO_GRANTS_RATIO);
}
+#define gfn_to_mfn_private(_d, _gfn, _p2mt) \
+ mfn_x(gfn_to_mfn_unshare(_d, _gfn, _p2mt, 1))
#define SHGNT_PER_PAGE_V1 (PAGE_SIZE / sizeof(grant_entry_v1_t))
#define shared_entry_v1(t, e) \
if ( !act->pin )
{
+ p2m_type_t p2mt;
+
act->domid = ld->domain_id;
if ( sha1 )
act->gfn = sha1->frame;
else
act->gfn = sha2->full_page.frame;
- act->frame = gmfn_to_mfn(rd, act->gfn);
+ act->frame = (op->flags & GNTMAP_readonly) ?
+ gmfn_to_mfn(rd, act->gfn) :
+ gfn_to_mfn_private(rd, act->gfn, &p2mt);
act->start = 0;
act->length = PAGE_SIZE;
act->is_sub_page = 0;
if ( rc != GNTST_okay )
goto undo_out;
}
- else if ( owner == rd )
+ else if ( owner == rd || owner == dom_cow )
{
if ( gnttab_host_mapping_get_page_type(op, ld, rd) &&
!get_page_type(pg, PGT_writable_page) )
struct gnttab_transfer gop;
unsigned long mfn;
unsigned int max_bitsize;
+ p2m_type_t p2mt;
for ( i = 0; i < count; i++ )
{
return -EFAULT;
}
- mfn = gmfn_to_mfn(d, gop.mfn);
+ mfn = gfn_to_mfn_private(d, gop.mfn, &p2mt);
/* Check the passed page frame for basic validity. */
if ( unlikely(!mfn_valid(mfn)) )
int is_sub_page;
struct domain *ignore;
s16 rc = GNTST_okay;
+ p2m_type_t p2mt;
*owning_domain = NULL;
else if ( sha1 )
{
act->gfn = sha1->frame;
- grant_frame = gmfn_to_mfn(rd, act->gfn);
+ grant_frame = readonly ? gmfn_to_mfn(rd, act->gfn) :
+ gfn_to_mfn_private(rd, act->gfn, &p2mt);
is_sub_page = 0;
trans_page_off = 0;
trans_length = PAGE_SIZE;
else if ( !(sha2->hdr.flags & GTF_sub_page) )
{
act->gfn = sha2->full_page.frame;
- grant_frame = gmfn_to_mfn(rd, act->gfn);
+ grant_frame = readonly ? gmfn_to_mfn(rd, act->gfn) :
+ gfn_to_mfn_private(rd, act->gfn, &p2mt);
is_sub_page = 0;
trans_page_off = 0;
trans_length = PAGE_SIZE;
else
{
act->gfn = sha2->sub_page.frame;
- grant_frame = gmfn_to_mfn(rd, act->gfn);
+ grant_frame = readonly ? gmfn_to_mfn(rd, act->gfn) :
+ gfn_to_mfn_private(rd, act->gfn, &p2mt);
is_sub_page = 1;
trans_page_off = sha2->sub_page.page_off;
trans_length = sha2->sub_page.length;
else
{
p2m_type_t p2mt;
- d_frame = mfn_x(gfn_to_mfn(dd, op->dest.u.gmfn, &p2mt));
+ d_frame = gfn_to_mfn_private(dd, op->dest.u.gmfn, &p2mt);
if ( p2m_is_paging(p2mt) )
{
p2m_mem_paging_populate(dd, op->dest.u.gmfn);
goto no_mem_4;
clear_page(t->shared_raw[i]);
}
-
+
for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
gnttab_create_shared_page(d, t, i);